Artificial Neural Networks (namespace Nn)


ConflictInfo: It stores information about the conflict between two rows in a data set
ConflictInfo()
bool operator <(const ConflictInfo& init)const
~ ConflictInfo()

ConvLayer
ConvLayer& operator =(const ConvLayer& init)
ConvLayer()
ConvLayer(const ConvLayer& init)
bool AllocateMemoryForTraining(size_t numThreads, bool includeGradient, bool includeStochastic)
bool Create(int layerType, size_t depth, int activationFuncType, size_t visualField, size_t pad, size_t stride)
activationFunctionType: NN_AFTYPE_LINEAR, NN_AFTYPE_LOGSIG, NN_AFTYPE_TANH, NN_AFTYPE_RELU, NN_AFTYPE_SOFTMAX, NN_AFTYPE_MAX, NN_AFTYPE_AVG layerType: NN_LAYTYPE_CONV, NN_LAYTYPE_FULLCN, NN_LAYTYPE_LOCALCN, NN_LAYTYPE_POOL if layerTyp =NN_LAYTYPE_CONV, then depth is the number of filters
bool Load(Sys::File& file, size_t numThreads)
bool Save(Sys::File& file)const
bool SetActivationFuncType(int activationFuncType)
activationFunctionType: NN_AFTYPE_LINEAR, NN_AFTYPE_LOGSIG, NN_AFTYPE_TANH, NN_AFTYPE_RELU, NN_AFTYPE_SOFTMAX, NN_POOL_MAX, NN_POOL_AVG
bool SetInputSize(size_t width, size_t height, size_t depth, size_t numThreads)
double OutputDerivative(double activation)
int GetActivationFuncType()
It returns: NN_AFTYPE_LINEAR, NN_AFTYPE_LOGSIG, NN_AFTYPE_TANH, NN_AFTYPE_RELU, NN_AFTYPE_SOFTMAX, NN_POOL_MAX, NN_POOL_AVG
int GetLayerType() const
It returns: NN_LAYTYPE_CONV, NN_LAYTYPE_FULLCN, NN_LAYTYPE_LOCALCN, NN_LAYTYPE_POOL
int GetStdRange() const
It returns: NN_STD_RANGE_NONE, NN_STD_RANGE_MINUS1_1, NN_STD_RANGE_0_1 NN_STD_RANGE_MINUS09_09, NN_STD_RANGE_01_09
size_t GetDepth() const
size_t GetHeight() const
size_t GetNumNeurons() const
size_t GetNumOutputs() const
size_t GetNumWeights() const
size_t GetWidth() const
void AcumulateGradient(double factor)
void Agitate(double perturbRatio, Nn::ConvLayer& source)
void ComputeDelta(const Nn::ConvLayer& nextLayer, size_t threadIndex)
void ComputeGradient(const Sys::Tensor& prevActivation, size_t threadIndex)
void ComputeOutput(const Sys::Tensor& input, size_t threadIndex)
For the given input, it computes the activation of the neurons in the layer when threadIndex = 0, the result is stored in activation[0] when threadIndex = 1, the result is stored in activation[1] . . .
void ComputeWeightPenalty()
void CopyBestIndividual(Math::GeneticAlgorithm& ga, size_t& index)
void CopyWeights(const Nn::ConvLayer& source)
No error checking is done to improve performanced
void Delete()
void GeneticInitialize(Math::GeneticIndividual& individual, size_t& index)
void GeneticSetFromBits(const Math::GeneticIndividual& individual, size_t& index)
void GetDescription(wstring& out_description)const
void Initialize()
void MoveDown()
void ReleaseMemoryForTraining()
void ResetDelta(size_t threadIndex)
void ResetGradient()
void ResetPreviousGradient()
void SetLearningRate(double learnRate)
void StorePreviousGradient();
void UpdateLearningRate(double initialLearnRate, double maxLearnRate, double growthLearnRate)
~ ConvLayer()

DeepLayer
DeepLayer& operator =(const DeepLayer& init)
DeepLayer()
DeepLayer(const DeepLayer& init)
bool AllocateMemoryForGradient(size_t numThreads)
bool Create(size_t numNeurons, int activationFuncType, size_t numThreads)
activationFunctionType: NN_AFTYPE_LINEAR, NN_AFTYPE_LOGSIG, NN_AFTYPE_TANH, NN_AFTYPE_RELU, NN_AFTYPE_SOFTMAX
bool Load(Sys::File& file, size_t numThreads)
bool Save(Sys::File& file)const
bool SetActivationFuncType(int activationFuncType)
activationFunctionType: NN_AFTYPE_LINEAR, NN_AFTYPE_LOGSIG, NN_AFTYPE_TANH, NN_AFTYPE_RELU, NN_AFTYPE_SOFTMAX
bool SetNumInputs(size_t numInputs)
double OutputDerivative(double activation)
int GetActivationFuncType()
int GetStdRange() const
It returns: NN_STD_RANGE_NONE, NN_STD_RANGE_MINUS1_1, NN_STD_RANGE_0_1 NN_STD_RANGE_MINUS09_09, NN_STD_RANGE_01_09
size_t GetNumInputs() const
size_t GetNumNeurons() const
size_t GetNumWeights() const
void Agitate(double perturbRatio, const Nn::DeepLayer& source)
void ComputeOutput(const valarray<double >& input, size_t threadIndex)
For the given input, it computes the activation of the neurons in the layer when threadIndex = 0, the result is stored in activation[0] when threadIndex = 1, the result is stored in activation[1] . . .
void ComputeWeightPenalty()
void CopyBestIndividual(Math::GeneticAlgorithm& ga, size_t& index)
void Delete()
void GeneticInitialize(Math::GeneticIndividual& individual, size_t& index)
void GeneticSetFromBits(const Math::GeneticIndividual& individual, size_t& index)
void Initialize()
void ReleaseMemoryForGradient()
void ResetGradient()
~ DeepLayer()

KohoNet: An artificial neural network without supervision (Kohonen ANN)
KohoNet& operator =(const KohoNet& init)
KohoNet()
KohoNet(const KohoNet& init)
bool AutoSetInputScaler(MATRIX& input)
bool Create(int numInputs, int numOutputs, int inputNormType)
bool GetInputScaler(int index, double& minimum, double& maximum)
bool SetInputName(int index, const wchar_t* name)
bool SetInputScaler(int index, double minimum, double maximum)
bool SetWeights(const MATRIX& weights)
const wchar_t* ComputeWinner(const MATRIX& input, valarray<double >& output)
const wchar_t* ComputeWinner(const MATRIX& input, valarray<int >& output)
const wchar_t* GetInputName(int index)const
const wchar_t* Load(const wchar_t* filename)
const wchar_t* Run(const MATRIX& input, MATRIX& output)
const wchar_t* Save(const wchar_t* filename)
const wchar_t* ScaleInputDataSet(const MATRIX& input, MATRIX& scaledInput, bool ignoreWarnings)
const wchar_t* SetTrainingSet(const MATRIX& trainSetIn, bool ignoreWarnings)
const wchar_t* TrainAdditive(Mt::ThreadLink& threadLink, Mt::DoubleTs& error, double learningRate, int numIterations)
const wchar_t* TrainSubtractive(Mt::ThreadLink& threadLink, Mt::DoubleTs& error, double learningRate, int numIterations)
int ComputeWinner(int trainCaseIndex)
int GetInputCount() const
int GetOutputCount() const
void Copy(const KohoNet& init)
void Delete()
void GetDescription(wchar_t* description, int length)
void GetNormalizedInput(MATRIX& normInput)
void GetWeights(MATRIX& weights)
void Unlearn()
~ KohoNet()

Layer: One layer of an artificial neural network
Layer& operator =(const Layer& init)
Layer(const Layer& init)
Layer(void)
double OutputDerivative(const size_t index)const
void Agitate(double perturbRatio, Nn::Layer& source)
void ComputeOutput(const MATRIX& input, size_t rowInputIndex)
Computes the output for the input in the row rowInputIndex output has only one row
void Copy(const Layer& init)
void Delete()
void GeneticInitialize(Math::GeneticIndividual& individual, size_t& index)
void Initialize()
~ Layer(void)

LayerNet: A multi-layer artificial neural network
LayerNet& operator =(const LayerNet& init)
LayerNet()
LayerNet(const LayerNet& init)
bool AutoSetInputScaler(MATRIX& input)
bool AutoSetOutputScaler(MATRIX& output)
bool Create(size_t inputCount, size_t hidden1Count, size_t hidden2Count, size_t outputCount)
bool GetActivation(size_t layerIndex, const MATRIX& input, MATRIX& out_activation)
layerIndex: 0, 1, 2, ..., numLayers-1 Number Layers 1 > layerIndex=0 (Output) Number Layers 2 > layerIndex=0 (Hidden1), layerIndex=1 (Output) Number Layers 3 > layerIndex=0 (Hidden1), layerIndex=2 (Hidden2), layerIndex=3 (Output)
bool GetActivation(size_t layerIndex, valarray<double >& out_activation)
layerIndex: 0, 1, 2, ..., numLayers-1 Number Layers 1 > layerIndex=0 (Output) Number Layers 2 > layerIndex=0 (Hidden1), layerIndex=1 (Output) Number Layers 3 > layerIndex=0 (Hidden1), layerIndex=2 (Hidden2), layerIndex=3 (Output)
bool GetInputScaler(int index, double& minimum, double& maximum)
bool GetOutputScaler(int index, double& minimum, double& maximum)
bool GetWeights(size_t layerIndex, MATRIX& out_weights)
layerIndex: 0, 1, 2, ..., numLayers-1 Number Layers 1 > layerIndex=0 (Output) Number Layers 2 > layerIndex=0 (Hidden1), layerIndex=1 (Output) Number Layers 3 > layerIndex=0 (Hidden1), layerIndex=2 (Hidden2), layerIndex=3 (Output)
bool Run(const MATRIX& input, MATRIX& output)
bool SetInputName(int index, const wchar_t* name)
bool SetInputScaler(int index, double minimum, double maximum)
bool SetOutputName(int index, const wchar_t* name)
bool SetOutputScaler(int index, double minimum, double maximum)
bool SetWeights(size_t layerIndex, const MATRIX& weights)
layerIndex: 0, 1, 2, ..., numLayers-1 Number Layers 1 > layerIndex=0 (Output) Number Layers 2 > layerIndex=0 (Hidden1), layerIndex=1 (Output) Number Layers 3 > layerIndex=0 (Hidden1), layerIndex=2 (Hidden2), layerIndex=3 (Output)
const wchar_t* GetInputName(int index)
const wchar_t* GetOutputName(int index)
const wchar_t* GetScaledOutput(MATRIX& scaledOutput)
const wchar_t* Load(const wchar_t* filename)
const wchar_t* Save(const wchar_t* filename)
const wchar_t* ScaleInputDataSet(const MATRIX& input, MATRIX& scaledInput, bool ignoreWarnings)
const wchar_t* ScaleOutputDataSet(const MATRIX& output, MATRIX& scaledOutput, bool ignoreWarnings)
const wchar_t* SetTrainingSet(const MATRIX& trainSetIn, const MATRIX& trainSetTarget, bool ignoreWarnings)
const wchar_t* TrainConjugateGradient(Mt::ThreadLink& threadLink, Mt::DoubleTs& mse, int epochs, double goal)
const wchar_t* TrainGenetic(Mt::ThreadLink& threadLink, Mt::DoubleTs& mse, Math::GeneticParam& param)
const wchar_t* TrainLevenbergMarquardt(Mt::ThreadLink& threadLink, Mt::DoubleTs& mse, int epochs, double goal)
const wchar_t* TrainRegression(Mt::ThreadLink& threadLink, Mt::DoubleTs& mse)
const wchar_t* TrainSimAnneal(Mt::ThreadLink& threadLink, Mt::DoubleTs& mse, Math::SimAnnealParam& param)
const wchar_t* TrainVariableMetric(Mt::ThreadLink& threadLink, Mt::DoubleTs& mse, int epochs, double goal)
double ComputeError()
double EvaluateFunc(const valarray<double >& x)
double GeneticGetError()
double LevenMar(MATRIX& input, int inputRow, int idep, double target, MATRIX& alpha, valarray<double >& beta, valarray<double >& hid2delta, valarray<double >& grad)
double LevenMarComputeHessianAndGradient(valarray<double >& hid2delta, valarray<double >& grad, MATRIX& hessian, valarray<double >& beta, Mt::ThreadLink& threadLink)
double SimAnnealGetError()
size_t GetHidden1NeuronCount() const
size_t GetHidden2NeuronCount() const
size_t GetMinNumTrainCases()
double ComputeTrueMse(const MATRIX& trainSet_in, const MATRIX& trainSet_target); double ComputeCurrentTrueMse();
size_t GetNumInputs() const
size_t GetNumLayers() const
numLayers = 1 > Output Layer numLayers = 2 > Hidden1 and Output Layer numLayers = 3 > Hidden1, Hidden2 and Output Layer
size_t GetNumNeurons(size_t layerIndex)const
size_t GetNumOutputs() const
static bool IsPredictionOverfitting(int seriesLength, int numInputs, int numHid)
static void ComputeBestPrediction(int seriesLength, const MATRIX& mse, int& out_row, int& out_col)
void ComputeOutput(const MATRIX& input, size_t inputRowIndex, size_t numLayers)
void Copy(const LayerNet& init)
void Delete()
void EvaluateFuncAndGrad(const valarray<double >& x, double& Fx, valarray<double >& gradient)
void EvaluateGrad(const valarray& x, valarray& outGrad);
void GeneticInitialize(Math::GeneticIndividual& individual)
void GeneticSetFromBits(const Math::GeneticIndividual& individual)
void GetDescription(wchar_t* description, int length)
void LevenMarMove(double step, valarray<double >& direction)
void SimAnnealCopy(const Math::ISimAnneal& source)
void SimAnnealInitialize()
void SimAnnealPerturb(Math::ISimAnneal& original, double temperature, double initialTemperature)
void Unlearn()
~ LayerNet()

Logsig: High performance class to compute y = 1.0/(1.0+exp(-x))
Logsig()
double Derivative(double y)const
double Func(double x)const
static double InverseFunc(double y)
~ Logsig()

ProbNet: A probabilistic artificial neural network
ProbNet& operator =(const ProbNet& init)
ProbNet()
ProbNet(const ProbNet& init)
const wchar_t* Load(const wchar_t* filename)
const wchar_t* Run(const MATRIX& trainSetInput, const MATRIX& trainSetTarget, const MATRIX& input, MATRIX& output)
const wchar_t* Save(const wchar_t* filename)
const wchar_t* TrainConjugateGradient(Mt::ThreadLink& threadLink, Mt::DoubleTs& mse, const MATRIX& trainSetInput, const MATRIX& trainSetTarget, int epochs, double goal)
const wchar_t* TrainVariableMetric(Mt::ThreadLink& threadLink, Mt::DoubleTs& mse, const MATRIX& trainSetInput, const MATRIX& trainSetTarget, int epochs, double goal)
double EvaluateFunc(const double x)
double EvaluateFunc(const valarray<double >& x)
int GetInputCount()
int GetOutputCount()
void Copy(const ProbNet& init)
void Delete()
void EvaluateFuncAndDeriv(const double x, double& Fx, double& dFx)
void EvaluateFuncAndGrad(const valarray<double >& x, double& Fx, valarray<double >& gradient)
void GetDescription(wchar_t* description, int length)
void GetWeights(valarray<double >& weights)
void SetWeights(const valarray<double >& weights)
~ ProbNet()

Rbm: Restricted Boltzmann Machine
Nn::Rbm& operator =(const Nn::Rbm& init)
Rbm()
Rbm(const Nn::Rbm& init)
bool Create(size_t numNeurons, int activationFuncType)
activationFunctionType: NN_AFTYPE_LOGSIG, NN_AFTYPE_TANH, NN_AFTYPE_RELU
bool Load(Sys::File& file, size_t numThreads)
bool Save(Sys::File& file)const
bool SetNumInputs(size_t numInputs, size_t numThreads)
double ComputeReconstructionError(const MATRIX& input, int errorType)
errorType: NN_ERROR_MSE, NN_ERROR_CROSSENTROPY
double ComputeReconstructionError(const MATRIX& input, int errorType, size_t threadIndex, size_t numThreads)
errorType: NN_ERROR_MSE, NN_ERROR_CROSSENTROPY
double GetMaxWeight() const
double MoveDown(const valarray<double >& inputMean, double momentum)
It returns the maximum increment
double MoveDown(const valarray<double >& inputMean, double momentum, double sparsityPenalty, double sparsityTarget)
It returns the maximum increment
double ReconstructionError(const valarray<double >& input, const valarray<double >& hidden, int errorType)
int GetActFuncType() const
It returns NN_AFTYPE_LOGSIG, NN_AFTYPE_TANH, NN_AFTYPE_RELU
int GetStdRange() const
It returns: NN_STD_RANGE_NONE, NN_STD_RANGE_MINUS1_1, NN_STD_RANGE_0_1 NN_STD_RANGE_MINUS09_09, NN_STD_RANGE_01_09
size_t GetNumInputs() const
size_t GetNumNeurons() const
void ActivateHiddenUnits(const valarray<double >& input, size_t threadIndex)
For the given input, it computes the activation of the neurons in the input level when threadIndex = 0, the result is stored in hiddenActivation[0] when threadIndex = 1, the result is stored in hiddenActivation[1] . . .
void ActivateVisibleUnits(const valarray<double >& input, size_t threadIndex)
For the given input, it computes the activation of the neurons in the output level when threadIndex = 0, the result is stored in visibleActivation[0] when threadIndex = 1, the result is stored in visibleActivation[1] . . .
void CopyData(const Nn::Rbm& source)
void Delete()
void Initialize(const valarray<double >& inputMean, size_t threadIndex)
void PrepareInput(MATRIX& input)
void ResetConstrativeDivergence(double initialLearnRate)
void ResetGradient()
void Sampling(const valarray<double >& in, valarray<double >& out, size_t threadIndex)
void Sampling(valarray<double >& inout, size_t threadIndex)
void UpdateLearning(bool isFirstEpoch, double initialLearnRate, double maxLearnRate, double growthLearnRate)
void UpdateOnRate(size_t threadIndex)
~ Rbm()

RbmProgress
RbmProgress()
~ RbmProgress()

Scaler: It stores an array of "ScalingInfo" elements to scale the input or output of an artificial neural network
Scaler& operator =(const Nn::Scaler& init)
Scaler()
Scaler(const Nn::Scaler& init)
bool AutoSet(const MATRIX& matrix)
bool Create(size_t count)
bool Get(size_t index, Nn::ScalingInfo& out_si)const
bool Get(size_t index, double& out_minimum, double& out_maximum)const
bool Load(Sys::File& file)
bool Save(Sys::File& file)const
bool ScaleFromStdRange(int stdRange, const valarray<double >& input, valarray<double >& output)const
stdRange: NN_STD_RANGE_NONE NN_STD_RANGE_MINUS1_1 from -1 to 1 NN_STD_RANGE_0_1 from 0 to 1 NN_STD_RANGE_MINUS09_09 from -0.9 to 0.9 NN_STD_RANGE_01_09 from 0.1 to 0.9
bool ScaleToStdRange(int stdRange, const valarray<double >& input, valarray<double >& output)const
stdRange: NN_STD_RANGE_NONE NN_STD_RANGE_MINUS1_1 from -1 to 1 NN_STD_RANGE_0_1 from 0 to 1 NN_STD_RANGE_MINUS09_09 from -0.9 to 0.9 NN_STD_RANGE_01_09 from 0.1 to 0.9
bool Set(size_t index, const Nn::ScalingInfo& si)
bool Set(size_t index, double minimum, double maximum)
bool SetName(size_t index, const wchar_t* name)
bool operator !=(const Nn::Scaler& init)const
bool operator ==(const Nn::Scaler& init)const
const wchar_t* GetName(size_t index)const
const wchar_t* ScaleFromStdRange(int stdRange, const MATRIX& input, MATRIX& output)const
stdRange: NN_STD_RANGE_NONE NN_STD_RANGE_MINUS1_1 from -1 to 1 NN_STD_RANGE_0_1 from 0 to 1 NN_STD_RANGE_MINUS09_09 from -0.9 to 0.9 NN_STD_RANGE_01_09 from 0.1 to 0.9
const wchar_t* ScaleTo11(const valarray<double >& input, valarray<double >& output)
This is for Kohonen networks It scales to the range [-1.0 1.0] The output has an extra column for the synthetic input (initially set to zero)
const wchar_t* ScaleToStdRange(int stdRange, const MATRIX& input, MATRIX& output)const
stdRange: NN_STD_RANGE_NONE NN_STD_RANGE_MINUS1_1 from -1 to 1 NN_STD_RANGE_0_1 from 0 to 1 NN_STD_RANGE_MINUS09_09 from -0.9 to 0.9 NN_STD_RANGE_01_09 from 0.1 to 0.9
const wchar_t* Scaler::ScaleTo11(const MATRIX& input, MATRIX& output)
This is for Kohonen networks It scales to the range [-1 1] The ouput matrix has an extra column for the synthetic input (initially set to zero)
size_t GetCount() const
static double GetMaximumStdValue(int stdRange)
stdRange: NN_STD_RANGE_MINUS1_1 returns 1.0 stdRange: NN_STD_RANGE_0_1 returns 1.0 stdRange: NN_STD_RANGE_MINUS09_09 returns 0.9 stdRange: NN_STD_RANGE_01_09 returns 0.9
static double GetMinimumStdValue(int stdRange)
stdRange: NN_STD_RANGE_MINUS1_1 returns -1.0 stdRange: NN_STD_RANGE_0_1 returns 0.0 stdRange: NN_STD_RANGE_MINUS09_09 returns -0.9 stdRange: NN_STD_RANGE_01_09 returns 0.1
void Copy(const Nn::Scaler& init)
void Delete()
~ Scaler()

ScalingInfo
ScalingInfo()
bool Load(Sys::File& file)
bool Save(Sys::File& file)const
~ ScalingInfo()

Tanh: High performance class to compute y = tanh(1.5*x)
Tanh()
double Derivative(double y)const
double Func(double x)const
static double InverseFunc(double y)
static double InverseSoftFunc(double y)
~ Tanh()
© Copyright 2000-2021 selo. All Rights Reserved. Jul 22 2021.